bitkeeper revision 1.1159.212.120 (42080fdeqkhdPXOxk9B5egncOrellQ)
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Tue, 8 Feb 2005 01:03:26 +0000 (01:03 +0000)
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>
Tue, 8 Feb 2005 01:03:26 +0000 (01:03 +0000)
Make phys_to_machine_mapping a static inline function.

Signed-off-by: Arun Sharma <arun.sharma@intel.com>
Signed-off-by: ian@xensource.com
xen/arch/x86/shadow.c
xen/arch/x86/vmx.c
xen/arch/x86/vmx_platform.c
xen/arch/x86/vmx_vmcs.c
xen/include/asm-x86/mm.h
xen/include/asm-x86/shadow.h

index 1ac4109155daafdf043bba25a99dc108c1f4242a..3c5d2151cb05a3c939a2ec7832d75d4a8474e8ee 100644 (file)
@@ -576,7 +576,7 @@ void vmx_shadow_invlpg(struct domain *d, unsigned long va)
         return;
     }
 
-    host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+    host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
     spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
 
     if (__put_user(spte, (unsigned long *)
@@ -813,7 +813,7 @@ static int check_pte(
 
         if (d->arch.shadow_mode == SHM_full_32) {
 
-            guest_gpfn = phys_to_machine_mapping[gpfn];
+            guest_gpfn = phys_to_machine_mapping(gpfn);
 
             if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) )
                 FAIL("spfn problem g.sf=%08lx", 
@@ -889,7 +889,7 @@ int check_pagetable(struct domain *d, pagetable_t pt, char *s)
 
     if (d->arch.shadow_mode == SHM_full_32) 
     {
-        host_gpfn = phys_to_machine_mapping[gpfn];
+        host_gpfn = phys_to_machine_mapping(gpfn);
         gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
 
     } else
index 486998c34a76ec22979f0925185bc014074106d4..fb9dbbd82f7dce318fdcf08359c7200eef9f8039 100644 (file)
@@ -129,7 +129,7 @@ static int vmx_do_page_fault(unsigned long va, unsigned long error_code)
 
     index = (va >> L2_PAGETABLE_SHIFT);
     if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
-        pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
+        pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
 
         VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n",
                 pagetable_val(ed->arch.pagetable));
@@ -304,7 +304,7 @@ inline unsigned long gva_to_gpa(unsigned long gva)
     __guest_get_pl2e(ed, gva, &gpde);
     index = (gva >> L2_PAGETABLE_SHIFT);
 
-    pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT];
+    pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
 
     ed->arch.guest_pl2e_cache[index] = 
             mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
@@ -451,8 +451,8 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
             /*
              * The guest CR3 must be pointing to the guest physical.
              */
-            if (!(pfn = phys_to_machine_mapping[
-                      d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) 
+            if (!(pfn = phys_to_machine_mapping(
+                      d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))) 
             {
                 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n", 
                         d->arch.arch_vmx.cpu_cr3);
@@ -504,7 +504,7 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
              * removed some translation or changed page attributes.
              * We simply invalidate the shadow.
              */
-            pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+            pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
             if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable))
                 __vmx_bug(regs);
             vmx_shadow_clear_state(d->domain);
@@ -521,7 +521,7 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs)
                         "Invalid CR3 value=%lx\n", value);
                 domain_crash(); /* need to take a clean path */
             }
-            pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+            pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
             vmx_shadow_clear_state(d->domain);
             d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
             shadow_mk_pagetable(d);
index 5be1c182be1a36ca1f06dbe7f6088704b904acba..f242581557442881e3f6c2dbecb44ebc5008ea5d 100644 (file)
@@ -369,7 +369,7 @@ static int inst_copy_from_guest(char *buf, unsigned long guest_eip, int inst_len
                 printk("inst_copy_from_guest- EXIT: read gpte faulted" );
                 return 0;
             }
-        mfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+        mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
         ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
         inst_start = (unsigned char *)map_domain_mem(ma);
                 
index 72096c704365ff04ec25c6a9ecd04f824db85f33..f90c689aed6bd8f1405bb49a5ada06e186c1bb9f 100644 (file)
@@ -118,7 +118,7 @@ int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
     addr = context->edi;
     offset = (addr & ~PAGE_MASK);
     addr = round_pgdown(addr);
-    mpfn = phys_to_machine_mapping[addr >> PAGE_SHIFT];
+    mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
     p = map_domain_mem(mpfn << PAGE_SHIFT);
 
     e820p = (struct e820entry *) ((unsigned long) p + offset); 
@@ -136,7 +136,7 @@ int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
     }   
     unmap_domain_mem(p);        
 
-    mpfn = phys_to_machine_mapping[gpfn];
+    mpfn = phys_to_machine_mapping(gpfn);
     p = map_domain_mem(mpfn << PAGE_SHIFT);
     d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p;
 
@@ -172,7 +172,7 @@ static int add_mapping_perdomain(struct exec_domain *d, unsigned long gpfn,
         d->domain->arch.mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = 
             mk_l1_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
     }
-    phys_to_machine_mapping[gpfn] = mpfn;
+    __phys_to_machine_mapping[gpfn] = mpfn;
 
     return 0;
 }
index a16a005fb5c70bf5c9b0a4daa7d7b875e5bd8816..1ba5af9ee4c83edb921c40f8e75da186b250dc99 100644 (file)
@@ -241,8 +241,12 @@ void synchronise_pagetables(unsigned long cpu_mask);
 #undef  phys_to_machine_mapping
 
 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
-#define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
-
+#define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
+/* Returns the machine physical */
+static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 
+{
+        return __phys_to_machine_mapping[pfn];
+}
 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
 
 #define DEFAULT_GDT_ENTRIES     (LAST_RESERVED_GDT_ENTRY+1)
index c95a4de69379e7ffa1db01000c0c2ddf72ca3b1c..2ab8245332e87d80da0682a5ce148961718dfa10 100644 (file)
@@ -51,7 +51,7 @@ extern void vmx_shadow_invlpg(struct domain *, unsigned long);
 
 #define  __get_phys_to_machine(_d, host_gpfn, gpfn)    \
     if ((_d)->arch.shadow_mode == SHM_full_32)         \
-        (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
+        (host_gpfn) = phys_to_machine_mapping(gpfn);   \
     else                                               \
         (host_gpfn) = (gpfn);
 
@@ -139,7 +139,7 @@ static inline void __guest_set_pl2e(
     {
         unsigned long pfn;
 
-        pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
+        pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
         ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
             mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
 
@@ -231,7 +231,7 @@ static inline void l1pte_write_fault(
     {
         unsigned long host_pfn, host_gpte;
         
-        host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+        host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
         host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
         spte = host_gpte | _PAGE_RW;
     }
@@ -265,7 +265,7 @@ static inline void l1pte_read_fault(
     {
         unsigned long host_pfn, host_gpte;
         
-        host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+        host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
         host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
         spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
     }
@@ -309,7 +309,7 @@ static inline void l1pte_propagate_from_guest(
             return;
         }
         
-        host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
+        host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
         host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
 
         if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==